Close

@InProceedings{MoraesEvPeToFeMa:2023:HeReLi,
               author = "Moraes, Rog{\'e}rio Ferreira de and Evangelista, Raphael dos S. 
                         and Pereira, Andre Luiz da S. and Toledo, Yanexis Pupo and 
                         Fernandes, Leandro A. F. and Mart{\'{\i}}, Luis",
          affiliation = "Universidade Federal Fluminense (UFF), Niter{\'o}i, Brazil and 
                         Universidade Federal Fluminense (UFF), Niter{\'o}i, Brazil and 
                         Universidade Federal Fluminense (UFF), Niter{\'o}i, Brazil and 
                         Universidade Federal Fluminense (UFF), Niter{\'o}i, Brazil and 
                         Universidade Federal Fluminense (UFF), Niter{\'o}i, Brazil and 
                         Inria Chile Research Center, Las Condes, Chile",
                title = "Heuristics to reduce linear combinations of activation functions 
                         to improve image classification",
            booktitle = "Proceedings...",
                 year = "2023",
               editor = "Clua, Esteban Walter Gonzalez and K{\"o}rting, Thales Sehn and 
                         Paulovich, Fernando Vieira and Feris, Rogerio",
         organization = "Conference on Graphics, Patterns and Images, 36. (SIBGRAPI)",
             keywords = "learned activation function, trainable activation function, linear 
                         combination of activation functions.",
             abstract = "Image classification is one of the classical problems in computer 
                         vision, and CNNs (Convolutional Neural Networks) are widely used 
                         for this task. However, the choice of a CNN can vary depending on 
                         the chosen dataset. In this context, we have trainable activation 
                         functions that are crucial in CNNs and adapt to the data. One 
                         technique for constructing these functions is to write them as a 
                         linear combination of other activation functions, where the 
                         coefficients of this combination are learned during training. 
                         However, if we have a large number of activation functions to 
                         combine, the computational cost can be very high, and manually 
                         testing and choosing these functions may be impractical, depending 
                         on the number of available activation functions. To alleviate the 
                         difficulty of choosing which activation functions should be part 
                         of the linear combination, we propose two heuristics: Linear 
                         Combination Approximator by Coefficients (LCAC) and Major and 
                         Uniform Coefficient Extractor (MUCE). Our heuristics provide an 
                         efficient selection of a subset of activation functions so that 
                         their results are better or equivalent to the linear combination 
                         that uses all 34 available activation functions in our experiments 
                         (C34), considering the image classification problem. Compared to 
                         the C34 function, the LCAC function was better or equivalent in 
                         62.5%, and the MUCE function in 87.5% of the conducted 
                         experiments.",
  conference-location = "Rio Grande, RS",
      conference-year = "Nov. 06-09, 2023",
                  doi = "10.1109/SIBGRAPI59091.2023.10347043",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI59091.2023.10347043",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/49KDKC2",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/49KDKC2",
           targetfile = "Moraes-paper50.pdf",
        urlaccessdate = "2024, Apr. 29"
}


Close